#include <xeno/keyhandler.h>
#include <xeno/interrupt.h>
#include <xeno/segment.h>
+#include <asm/domain_page.h> /* TEST_READ_VALIDITY */
#if 1
+#define TEST_READ_VALIDITY
#define DPRINTK(_f, _a...) printk( _f , ## _a )
#else
#define DPRINTK(_f, _a...) ((void)0)
unsigned long flags;
pending_req_t *pending_req = bh->pending_req;
+ /* An error fails the entire request. */
+ if ( !uptodate )
+ {
+ DPRINTK("Buffer not up-to-date at end of operation\n");
+ pending_req->status = 1;
+ }
+#ifdef TEST_READ_VALIDITY
+ else
+ {
+ unsigned long *buff = map_domain_mem(virt_to_phys(bh->b_data));
+ if ( (buff[ 0] == 0xdeadbeef) &&
+ (buff[127] == 0xdeadbeef) )
+ printk("A really fucked buffer at %ld\n", bh->b_rsector);
+ unmap_domain_mem(buff);
+ }
+#endif
+
unlock_buffer(pending_req->domain,
virt_to_phys(bh->b_data),
bh->b_size,
if ( atomic_dec_and_test(&pending_req->pendcnt) )
{
make_response(pending_req->domain, pending_req->id,
- pending_req->operation, uptodate ? 0 : 1);
+ pending_req->operation, pending_req->status);
spin_lock_irqsave(&pend_prod_lock, flags);
pending_ring[pending_prod] = pending_req - pending_reqs;
PENDREQ_IDX_INC(pending_prod);
pending_req->domain = p;
pending_req->id = req->id;
pending_req->operation = operation;
+ pending_req->status = 0;
atomic_set(&pending_req->pendcnt, nr_psegs);
/* Now we pass each segment down to the real blkdev layer. */
}
else
{
+#ifdef TEST_READ_VALIDITY
+ unsigned long *buff = map_domain_mem(phys_seg[i].buffer);
+ buff[ 0] = 0xdeadbeef;
+ buff[127] = 0xdeadbeef;
+ unmap_domain_mem(buff);
+#endif
bh->b_state = (1 << BH_Mapped) | (1 << BH_Read);
}
{
case XEN_BLOCK_READ:
case XEN_BLOCK_WRITE:
+ if ( bret->status )
+ printk(KERN_ALERT "Bad return from blkdev data request\n");
for ( bh = (struct buffer_head *)bret->id;
bh != NULL;
bh = next_bh )
{
next_bh = bh->b_reqnext;
bh->b_reqnext = NULL;
- bh->b_end_io(bh, 1);
+ bh->b_end_io(bh, !bret->status);
}
break;
case XEN_BLOCK_SEG_DELETE:
case XEN_BLOCK_PROBE_SEG:
case XEN_BLOCK_PROBE_BLK:
+ if ( bret->status )
+ printk(KERN_ALERT "Bad return from blkdev control request\n");
xlblk_control_msg_pending = 0;
break;
char *aligned_buf;
/* We copy from an aligned buffer, as interface needs sector alignment. */
- aligned_buf = get_free_page(GFP_KERNEL);
+ aligned_buf = (char *)get_free_page(GFP_KERNEL);
if ( aligned_buf == NULL ) BUG();
xlblk_control_msg_pending = 1;
while ( xlblk_control_msg_pending ) barrier();
memcpy(buffer, aligned_buf, size);
- free_page(aligned_buf);
+ free_page((unsigned long)aligned_buf);
return 0;
}